Brain Tumour Classification using CNN¶

In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import os
import itertools
import  seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import tensorflow as tf
from tensorflow.keras import layers, models, regularizers
from tensorflow.python.ops.gen_batch_ops import batch
from keras.callbacks import ModelCheckpoint, EarlyStopping
In [ ]:
IMAGE_SIZE = 236
BATCH_SIZE = 32
EPOCHS = 25
CHANNELS = 3
In [ ]:
train_data = tf.keras.preprocessing.image_dataset_from_directory(
    'C:/Users/Oabo Nwako/Desktop/Study files/Business Intelligence and Data Analytics/Practise/BrainTumorDetection/Training',
    image_size = (IMAGE_SIZE,IMAGE_SIZE),
    shuffle = True,
    batch_size = BATCH_SIZE,
)
Found 2870 files belonging to 4 classes.
In [ ]:
class_names = train_data.class_names
class_names
Out[ ]:
['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']
In [ ]:
len(train_data)
Out[ ]:
90
In [ ]:
actual_size = len(train_data) * 32
actual_size
Out[ ]:
2880
In [ ]:
#Take shape of the first image
for image_batch, label_batch in train_data.take(1):
    print(image_batch[0].shape, class_names[label_batch[0]])
(236, 236, 3) pituitary_tumor
In [ ]:
class_names[label_batch[2]]
Out[ ]:
'no_tumor'
In [ ]:
for image_batch, label_batch in train_data.take(1):
    plt.imshow(image_batch[0].numpy().astype("uint8"))
    plt.title(class_names[label_batch[0]])
    plt.axis('off')
In [ ]:
#Display 9 MRI images
plt.figure(figsize=(12,7))

for image_batch, label_batch in train_data.take(1):
    for i in range(9):
        ax = plt.subplot(3,3,i+1)
        plt.imshow(image_batch[i].numpy().astype("uint8"))
        plt.title(class_names[label_batch[i]])
        plt.axis('off')

Testing dataset¶

In [ ]:
#loading test dataset
test_data = tf.keras.preprocessing.image_dataset_from_directory(
    'C:/Users/Oabo Nwako/Desktop/Study files/Business Intelligence and Data Analytics/Practise/BrainTumorDetection/Testing',
    image_size = (IMAGE_SIZE,IMAGE_SIZE),
    shuffle = True,
    batch_size = BATCH_SIZE,
)
Found 394 files belonging to 4 classes.
In [ ]:
#See classes
ttclass_names = test_data.class_names
ttclass_names
Out[ ]:
['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']
In [ ]:
#actual length = len of test_data * batch size
x = len(test_data) * BATCH_SIZE
x
Out[ ]:
416
In [ ]:
#shape of first image
for image_batch, label_batch in test_data.take(1):
    print(image_batch[0].shape, ttclass_names[label_batch[0]])
(236, 236, 3) glioma_tumor
In [ ]:
#First 6 images
plt.figure(figsize=(15,7))

for image_batch, label_batch in test_data.take(1):
    for i in range(6):
        ax = plt.subplot(2, 3, i+1)
        plt.imshow(image_batch[i].numpy().astype('uint8'))
        plt.title(ttclass_names[label_batch[i]])
        plt.axis('off')
In [ ]:
 

Train Test Split¶

In [ ]:
# a function to create for validation split
def get_dataset_partitions(train, val_split=0.2, train_split=0.8, shuffle=True, shuffle_size=10000):
    ds_size = len(train)
    if shuffle:
        ds = train.shuffle(shuffle_size, seed=15)
        
    train_size = int(ds_size * train_split)
    val_size = int(ds_size * val_split)
    
    train_ds = ds.take(train_size)
    val_ds = ds.skip(train_size).take(val_size)
    
    return train_ds, val_ds
        
In [ ]:
#create validation set from the dataset
train_ds, val_ds = get_dataset_partitions(train_data)
test_ds = test_data
In [ ]:
print(len(train_ds), len(val_ds), len(test_ds))
72 18 13

Preprocessing¶

In [ ]:
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size= tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
In [ ]:
#resizing and scaling
resize_rescale = tf.keras.Sequential([
    layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
    layers.experimental.preprocessing.Rescaling(1.0/255) #ensures pixel value is 0-1,
])
In [ ]:
#data augmentation
data_augment = tf.keras.Sequential([
    layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'),
    layers.experimental.preprocessing.RandomRotation(0.3),
])

Build Model¶

In [ ]:
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 4

model = models.Sequential([
    resize_rescale,
    data_augment,
    layers.Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape = input_shape),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(n_classes, activation='softmax')
])

model.build(input_shape = input_shape)
In [ ]:
model.summary()
Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 sequential (Sequential)     (32, 236, 236, 3)         0         
                                                                 
 sequential_1 (Sequential)   (32, 236, 236, 3)         0         
                                                                 
 conv2d (Conv2D)             (32, 234, 234, 32)        896       
                                                                 
 max_pooling2d (MaxPooling2D  (32, 117, 117, 32)       0         
 )                                                               
                                                                 
 conv2d_1 (Conv2D)           (32, 115, 115, 64)        18496     
                                                                 
 max_pooling2d_1 (MaxPooling  (32, 57, 57, 64)         0         
 2D)                                                             
                                                                 
 conv2d_2 (Conv2D)           (32, 55, 55, 64)          36928     
                                                                 
 max_pooling2d_2 (MaxPooling  (32, 27, 27, 64)         0         
 2D)                                                             
                                                                 
 conv2d_3 (Conv2D)           (32, 25, 25, 64)          36928     
                                                                 
 max_pooling2d_3 (MaxPooling  (32, 12, 12, 64)         0         
 2D)                                                             
                                                                 
 conv2d_4 (Conv2D)           (32, 10, 10, 64)          36928     
                                                                 
 max_pooling2d_4 (MaxPooling  (32, 5, 5, 64)           0         
 2D)                                                             
                                                                 
 conv2d_5 (Conv2D)           (32, 3, 3, 64)            36928     
                                                                 
 max_pooling2d_5 (MaxPooling  (32, 1, 1, 64)           0         
 2D)                                                             
                                                                 
 flatten (Flatten)           (32, 64)                  0         
                                                                 
 dense (Dense)               (32, 64)                  4160      
                                                                 
 dense_1 (Dense)             (32, 4)                   260       
                                                                 
=================================================================
Total params: 171,524
Trainable params: 171,524
Non-trainable params: 0
_________________________________________________________________
In [ ]:
#optimisation
model.compile (
    optimizer = 'Adam',
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics = ['accuracy'],
)

Train Model¶

Model 1¶

In [ ]:
#Define early stopping
early_stopping = EarlyStopping(monitor = "loss", patience=5, min_delta=0, mode="auto", restore_best_weights = True)
checkpoint = ModelCheckpoint('best_model.h5', monitor="loss", save_best_only = True)
In [ ]:
history = model.fit(
    train_ds,
    batch_size = BATCH_SIZE,
    verbose= 1,
    epochs = EPOCHS,
    validation_data = val_ds,
    callbacks = [early_stopping,checkpoint]
)
Epoch 1/25
72/72 [==============================] - 272s 3s/step - loss: 1.3272 - accuracy: 0.3391 - val_loss: 1.2524 - val_accuracy: 0.3438
Epoch 2/25
72/72 [==============================] - 159s 2s/step - loss: 1.1621 - accuracy: 0.4738 - val_loss: 1.0657 - val_accuracy: 0.5590
Epoch 3/25
72/72 [==============================] - 160s 2s/step - loss: 1.0448 - accuracy: 0.5462 - val_loss: 0.9696 - val_accuracy: 0.5747
Epoch 4/25
72/72 [==============================] - 165s 2s/step - loss: 0.9141 - accuracy: 0.6142 - val_loss: 0.7993 - val_accuracy: 0.7031
Epoch 5/25
72/72 [==============================] - 163s 2s/step - loss: 0.9283 - accuracy: 0.5950 - val_loss: 0.9043 - val_accuracy: 0.6632
Epoch 6/25
72/72 [==============================] - 161s 2s/step - loss: 0.8474 - accuracy: 0.6495 - val_loss: 0.8520 - val_accuracy: 0.6632
Epoch 7/25
72/72 [==============================] - 164s 2s/step - loss: 0.7957 - accuracy: 0.6713 - val_loss: 0.7142 - val_accuracy: 0.7031
Epoch 8/25
72/72 [==============================] - 167s 2s/step - loss: 0.7224 - accuracy: 0.6853 - val_loss: 0.6698 - val_accuracy: 0.7378
Epoch 9/25
72/72 [==============================] - 165s 2s/step - loss: 0.7087 - accuracy: 0.7071 - val_loss: 0.7012 - val_accuracy: 0.7153
Epoch 10/25
72/72 [==============================] - 162s 2s/step - loss: 0.7055 - accuracy: 0.7066 - val_loss: 1.0161 - val_accuracy: 0.5781
Epoch 11/25
72/72 [==============================] - 164s 2s/step - loss: 0.6450 - accuracy: 0.7323 - val_loss: 0.6565 - val_accuracy: 0.7552
Epoch 12/25
72/72 [==============================] - 168s 2s/step - loss: 0.6139 - accuracy: 0.7611 - val_loss: 0.6193 - val_accuracy: 0.7622
Epoch 13/25
72/72 [==============================] - 159s 2s/step - loss: 0.5765 - accuracy: 0.7585 - val_loss: 0.5298 - val_accuracy: 0.8142
Epoch 14/25
72/72 [==============================] - 161s 2s/step - loss: 0.5472 - accuracy: 0.7733 - val_loss: 0.5207 - val_accuracy: 0.7986
Epoch 15/25
72/72 [==============================] - 165s 2s/step - loss: 0.5106 - accuracy: 0.7956 - val_loss: 0.4885 - val_accuracy: 0.8247
Epoch 16/25
72/72 [==============================] - 163s 2s/step - loss: 0.5036 - accuracy: 0.8034 - val_loss: 0.4889 - val_accuracy: 0.8038
Epoch 17/25
72/72 [==============================] - 160s 2s/step - loss: 0.4931 - accuracy: 0.7995 - val_loss: 0.4105 - val_accuracy: 0.8281
Epoch 18/25
72/72 [==============================] - 165s 2s/step - loss: 0.4807 - accuracy: 0.8104 - val_loss: 0.4754 - val_accuracy: 0.8056
Epoch 19/25
72/72 [==============================] - 160s 2s/step - loss: 0.4497 - accuracy: 0.8143 - val_loss: 0.4388 - val_accuracy: 0.8264
Epoch 20/25
72/72 [==============================] - 161s 2s/step - loss: 0.4579 - accuracy: 0.8252 - val_loss: 0.4699 - val_accuracy: 0.8212
Epoch 21/25
72/72 [==============================] - 160s 2s/step - loss: 0.4519 - accuracy: 0.8195 - val_loss: 0.3625 - val_accuracy: 0.8733
Epoch 22/25
72/72 [==============================] - 164s 2s/step - loss: 0.4467 - accuracy: 0.8204 - val_loss: 0.4472 - val_accuracy: 0.8073
Epoch 23/25
72/72 [==============================] - 164s 2s/step - loss: 0.3940 - accuracy: 0.8444 - val_loss: 0.2839 - val_accuracy: 0.8924
Epoch 24/25
72/72 [==============================] - 162s 2s/step - loss: 0.3886 - accuracy: 0.8474 - val_loss: 0.3186 - val_accuracy: 0.8767
Epoch 25/25
72/72 [==============================] - 160s 2s/step - loss: 0.3947 - accuracy: 0.8439 - val_loss: 0.3300 - val_accuracy: 0.8681
In [ ]:
#display train, val,test loss and train,val, test, accuracy
train_scores = model.evaluate(train_ds)
val_scores = model.evaluate(val_ds)
scores = model.evaluate(test_ds)
scores
72/72 [==============================] - 39s 547ms/step - loss: 0.3407 - accuracy: 0.8666
18/18 [==============================] - 10s 535ms/step - loss: 0.3300 - accuracy: 0.8681
13/13 [==============================] - 9s 427ms/step - loss: 2.6619 - accuracy: 0.5330
Out[ ]:
[2.661869525909424, 0.5329949259757996]
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']
In [ ]:
def plot_evaluate(accuracy, validation_accuracy, loss, validation_loss, epochs):
    plt.figure(figsize=(10,10))
    plt.subplot(1,2,1)
    plt.plot(range(epochs), acc, label='Accuraccy')
    plt.plot(range(epochs), val_acc, label='Val_Accuraccy')
    plt.xlabel("epochs")
    plt.legend(loc='lower right')
    plt.title("Training and Validation Accuracy")

    plt.subplot(1,2,2)
    plt.plot(range(epochs), loss, label='Loss')
    plt.plot(range(epochs), val_loss, label='Val_Loss')
    plt.xlabel("epochs")
    plt.legend(loc='lower right')
    plt.title("Training and Validation Loss")
    plt.show()
In [ ]:
plot_evaluate(acc, val_acc, loss, val_loss, EPOCHS)
In [ ]:
## function to predict images

def predict(model, img):
    img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
    img_array = tf.expand_dims(img_array, 0)
    
    predictions = model.predict(img_array)
    
    predicted_class = ttclass_names[np.argmax(predictions[0])]
    confidence = round(100*(np.max(predictions[0])), 2)
    return predicted_class, confidence
In [ ]:
#Evaluate confidence of model on test set
plt.figure(figsize=(15,15))
for images, labels in test_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3,3, i + 1)
        plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class, confidence = predict(model, images[i].numpy())
        actual_class = class_names[labels[i]]
        
        plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
        
        plt.axis("off")
1/1 [==============================] - 0s 439ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step

Confusion Matrix for Model 1¶

In [ ]:
def predict1(model, img):
    img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
    img_array = tf.expand_dims(img_array, 0)
    
    predictions = model.predict(img_array)
    
    predicted_class = ttclass_names[np.argmax(predictions[0])]
    #confidence = round(100*(np.max(predictions[0])), 2)
    return predicted_class
In [ ]:
aclass = []
pclass = []
for images, labels in test_ds:
    for i in range(10):
        #ax = plt.subplot(3,3, i + 1)
        #plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class = predict1(model, images[i].numpy())
        actual_class = class_names[labels[i]]
        if actual_class == 'glioma_tumor':
            aclass.append(0)
        elif actual_class == 'meningioma_tumor':
            aclass.append(1)
        elif actual_class == 'no_tumor':
            aclass.append(2)
        elif actual_class == 'pituitary_tumor':
            aclass.append(3)
        
        if predicted_class == 'glioma_tumor':
            pclass.append(0)
        elif predicted_class == 'meningioma_tumor':
            pclass.append(1)
        elif predicted_class == 'no_tumor':
            pclass.append(2)
        elif predicted_class == 'pituitary_tumor':
            pclass.append(3)
        
        cm = confusion_matrix(np.array(aclass), np.array(pclass))
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 27ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 48ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 84ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 44ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 41ms/step
In [ ]:
#plotting a confusion matrix for CNN Model 1
plt.figure(figsize=(10,10))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.xlabel("Predicted labels", fontsize=14, weight='bold')
plt.ylabel("Actual labels", fontsize=14, weight='bold')
tick_marks = np.arange(4)
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
plt.tight_layout()
plt.title("Confusion Matrix for CNN model1")
Out[ ]:
Text(0.5, 1.0, 'Confusion Matrix for CNN model1')

Model 2¶

In [ ]:
model2 = models.Sequential([
    resize_rescale,
    data_augment,
    layers.Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape = input_shape),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Flatten(),
    layers.Dense(64, activation='relu', kernel_regularizer = regularizers.l2(0.001)),
    layers.Dense(n_classes, activation='softmax')
])

model2.build(input_shape = input_shape)
In [ ]:
model2.summary()
Model: "sequential_3"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 sequential (Sequential)     (None, 236, 236, 3)       0         
                                                                 
 sequential_1 (Sequential)   (None, 236, 236, 3)       0         
                                                                 
 conv2d_6 (Conv2D)           (32, 234, 234, 32)        896       
                                                                 
 max_pooling2d_6 (MaxPooling  (32, 117, 117, 32)       0         
 2D)                                                             
                                                                 
 conv2d_7 (Conv2D)           (32, 115, 115, 64)        18496     
                                                                 
 max_pooling2d_7 (MaxPooling  (32, 57, 57, 64)         0         
 2D)                                                             
                                                                 
 conv2d_8 (Conv2D)           (32, 55, 55, 64)          36928     
                                                                 
 max_pooling2d_8 (MaxPooling  (32, 27, 27, 64)         0         
 2D)                                                             
                                                                 
 conv2d_9 (Conv2D)           (32, 25, 25, 64)          36928     
                                                                 
 max_pooling2d_9 (MaxPooling  (32, 12, 12, 64)         0         
 2D)                                                             
                                                                 
 conv2d_10 (Conv2D)          (32, 10, 10, 64)          36928     
                                                                 
 max_pooling2d_10 (MaxPoolin  (32, 5, 5, 64)           0         
 g2D)                                                            
                                                                 
 conv2d_11 (Conv2D)          (32, 3, 3, 64)            36928     
                                                                 
 max_pooling2d_11 (MaxPoolin  (32, 1, 1, 64)           0         
 g2D)                                                            
                                                                 
 flatten_1 (Flatten)         (32, 64)                  0         
                                                                 
 dense_2 (Dense)             (32, 64)                  4160      
                                                                 
 dense_3 (Dense)             (32, 4)                   260       
                                                                 
=================================================================
Total params: 171,524
Trainable params: 171,524
Non-trainable params: 0
_________________________________________________________________
In [ ]:
model2.compile(
    optimizer="adam",
    loss =tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics = ['accuracy'],
    
)
In [ ]:
history2 = model2.fit(
    train_ds,
    verbose = 1,
    validation_data = val_ds,
    epochs = 25
)
Epoch 1/25
72/72 [==============================] - 167s 2s/step - loss: 1.3395 - accuracy: 0.3884 - val_loss: 1.2471 - val_accuracy: 0.4375
Epoch 2/25
72/72 [==============================] - 163s 2s/step - loss: 1.1955 - accuracy: 0.4765 - val_loss: 1.2220 - val_accuracy: 0.4948
Epoch 3/25
72/72 [==============================] - 160s 2s/step - loss: 1.1016 - accuracy: 0.5301 - val_loss: 1.0270 - val_accuracy: 0.6007
Epoch 4/25
72/72 [==============================] - 161s 2s/step - loss: 0.9698 - accuracy: 0.6146 - val_loss: 0.9158 - val_accuracy: 0.6632
Epoch 5/25
72/72 [==============================] - 172s 2s/step - loss: 0.9094 - accuracy: 0.6282 - val_loss: 0.8443 - val_accuracy: 0.6632
Epoch 6/25
72/72 [==============================] - 160s 2s/step - loss: 0.8487 - accuracy: 0.6452 - val_loss: 0.7638 - val_accuracy: 0.7083
Epoch 7/25
72/72 [==============================] - 160s 2s/step - loss: 0.8703 - accuracy: 0.6425 - val_loss: 0.7637 - val_accuracy: 0.6927
Epoch 8/25
72/72 [==============================] - 163s 2s/step - loss: 0.7625 - accuracy: 0.6787 - val_loss: 0.7824 - val_accuracy: 0.7031
Epoch 9/25
72/72 [==============================] - 161s 2s/step - loss: 0.7418 - accuracy: 0.6983 - val_loss: 0.7060 - val_accuracy: 0.7326
Epoch 10/25
72/72 [==============================] - 159s 2s/step - loss: 0.7118 - accuracy: 0.7092 - val_loss: 0.6886 - val_accuracy: 0.7378
Epoch 11/25
72/72 [==============================] - 161s 2s/step - loss: 0.6539 - accuracy: 0.7289 - val_loss: 0.5860 - val_accuracy: 0.7882
Epoch 12/25
72/72 [==============================] - 161s 2s/step - loss: 0.6683 - accuracy: 0.7302 - val_loss: 0.6404 - val_accuracy: 0.7760
Epoch 13/25
72/72 [==============================] - 160s 2s/step - loss: 0.6267 - accuracy: 0.7554 - val_loss: 0.6126 - val_accuracy: 0.7830
Epoch 14/25
72/72 [==============================] - 157s 2s/step - loss: 0.5881 - accuracy: 0.7624 - val_loss: 0.5324 - val_accuracy: 0.8212
Epoch 15/25
72/72 [==============================] - 158s 2s/step - loss: 0.5646 - accuracy: 0.7764 - val_loss: 0.5023 - val_accuracy: 0.8264
Epoch 16/25
72/72 [==============================] - 161s 2s/step - loss: 0.5318 - accuracy: 0.7873 - val_loss: 0.5259 - val_accuracy: 0.8316
Epoch 17/25
72/72 [==============================] - 161s 2s/step - loss: 0.5092 - accuracy: 0.8099 - val_loss: 0.4784 - val_accuracy: 0.8542
Epoch 18/25
72/72 [==============================] - 156s 2s/step - loss: 0.4974 - accuracy: 0.8134 - val_loss: 0.4386 - val_accuracy: 0.8681
Epoch 19/25
72/72 [==============================] - 160s 2s/step - loss: 0.4569 - accuracy: 0.8313 - val_loss: 0.4280 - val_accuracy: 0.8628
Epoch 20/25
72/72 [==============================] - 159s 2s/step - loss: 0.4424 - accuracy: 0.8400 - val_loss: 0.5748 - val_accuracy: 0.7778
Epoch 21/25
72/72 [==============================] - 156s 2s/step - loss: 0.4398 - accuracy: 0.8383 - val_loss: 0.4758 - val_accuracy: 0.8264
Epoch 22/25
72/72 [==============================] - 158s 2s/step - loss: 0.3906 - accuracy: 0.8505 - val_loss: 0.3352 - val_accuracy: 0.8854
Epoch 23/25
72/72 [==============================] - 160s 2s/step - loss: 0.4323 - accuracy: 0.8344 - val_loss: 0.3888 - val_accuracy: 0.8698
Epoch 24/25
72/72 [==============================] - 158s 2s/step - loss: 0.3867 - accuracy: 0.8553 - val_loss: 0.2826 - val_accuracy: 0.9062
Epoch 25/25
72/72 [==============================] - 157s 2s/step - loss: 0.3355 - accuracy: 0.8784 - val_loss: 0.3308 - val_accuracy: 0.8906
In [ ]:
#display train, val,test loss and train,val, test, accuracy
train_scores2 = model2.evaluate(train_ds)
val_scores2 = model2.evaluate(val_ds)
scores2 = model2.evaluate(test_ds)
scores2
72/72 [==============================] - 40s 546ms/step - loss: 0.3628 - accuracy: 0.8684
18/18 [==============================] - 10s 574ms/step - loss: 0.3308 - accuracy: 0.8906
13/13 [==============================] - 7s 501ms/step - loss: 1.9293 - accuracy: 0.5914
Out[ ]:
[1.9292805194854736, 0.5913705825805664]
In [ ]:
#accuracy
acc = history2.history['accuracy']
val_acc = history.history['val_accuracy']

#losses
loss = history2.history['accuracy']
val_loss = history2.history['val_accuracy']
In [ ]:
plot_evaluate(acc, val_acc, loss, val_loss, EPOCHS)

Confusion Matrix for Model 2¶

In [ ]:
aclass = []
pclass = []
for images, labels in test_ds:
    for i in range(10):
        #ax = plt.subplot(3,3, i + 1)
        #plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class = predict1(model2, images[i].numpy())
        actual_class = class_names[labels[i]]
        if actual_class == 'glioma_tumor':
            aclass.append(0)
        elif actual_class == 'meningioma_tumor':
            aclass.append(1)
        elif actual_class == 'no_tumor':
            aclass.append(2)
        elif actual_class == 'pituitary_tumor':
            aclass.append(3)
        
        if predicted_class == 'glioma_tumor':
            pclass.append(0)
        elif predicted_class == 'meningioma_tumor':
            pclass.append(1)
        elif predicted_class == 'no_tumor':
            pclass.append(2)
        elif predicted_class == 'pituitary_tumor':
            pclass.append(3)
        
        cm = confusion_matrix(np.array(aclass), np.array(pclass))
1/1 [==============================] - 1s 681ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 62ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 62ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 55ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 62ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 101ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 42ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 54ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 72ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 39ms/step
1/1 [==============================] - 0s 44ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 41ms/step
1/1 [==============================] - 0s 59ms/step
In [ ]:
#plotting a confusion matrix for CNN Model 2
plt.figure(figsize=(10,10))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.xlabel("Predicted labels", fontsize=14, weight='bold')
plt.ylabel("Actual labels", fontsize=14, weight='bold')
tick_marks = np.arange(4)
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
plt.tight_layout()
plt.title("Confusion Matrix for CNN model2")
Out[ ]:
Text(0.5, 1.0, 'Confusion Matrix for CNN model2')
In [ ]:
#Evaluate confidence of model on test set
plt.figure(figsize=(15,15))
for images, labels in test_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3,3, i + 1)
        plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class, confidence = predict(model2, images[i].numpy())
        actual_class = class_names[labels[i]]
        
        plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
        
        plt.axis("off")
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step

Model 3¶

In [ ]:
#n_classes = 4
model3 = models.Sequential([
    resize_rescale,
    data_augment,
    layers.Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape = input_shape),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Flatten(),
    layers.Dense(512, activation='relu', kernel_regularizer = regularizers.l2(0.001)),
    layers.Dense(n_classes, activation='softmax')
])

model3.build(input_shape = input_shape)
In [ ]:
model3.compile(
    optimizer="adam",
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics = ['accuracy'],
)
In [ ]:
history3 = model3.fit(
    train_ds,
    verbose=1,
    validation_data = val_ds,
    epochs = 25,
)
Epoch 1/25
72/72 [==============================] - 166s 2s/step - loss: 1.3260 - accuracy: 0.4032 - val_loss: 1.2238 - val_accuracy: 0.5017
Epoch 2/25
72/72 [==============================] - 167s 2s/step - loss: 1.1519 - accuracy: 0.4935 - val_loss: 1.1467 - val_accuracy: 0.5052
Epoch 3/25
72/72 [==============================] - 162s 2s/step - loss: 1.0545 - accuracy: 0.5588 - val_loss: 1.0172 - val_accuracy: 0.5920
Epoch 4/25
72/72 [==============================] - 159s 2s/step - loss: 0.9573 - accuracy: 0.6037 - val_loss: 0.9031 - val_accuracy: 0.6337
Epoch 5/25
72/72 [==============================] - 163s 2s/step - loss: 0.8876 - accuracy: 0.6447 - val_loss: 0.9096 - val_accuracy: 0.6267
Epoch 6/25
72/72 [==============================] - 160s 2s/step - loss: 0.8595 - accuracy: 0.6622 - val_loss: 0.8401 - val_accuracy: 0.6615
Epoch 7/25
72/72 [==============================] - 160s 2s/step - loss: 0.8133 - accuracy: 0.6635 - val_loss: 0.8464 - val_accuracy: 0.6562
Epoch 8/25
72/72 [==============================] - 162s 2s/step - loss: 0.7654 - accuracy: 0.7014 - val_loss: 0.7603 - val_accuracy: 0.7153
Epoch 9/25
72/72 [==============================] - 161s 2s/step - loss: 0.7342 - accuracy: 0.7071 - val_loss: 0.7299 - val_accuracy: 0.7170
Epoch 10/25
72/72 [==============================] - 160s 2s/step - loss: 0.6965 - accuracy: 0.7232 - val_loss: 0.6896 - val_accuracy: 0.7396
Epoch 11/25
72/72 [==============================] - 160s 2s/step - loss: 0.6680 - accuracy: 0.7389 - val_loss: 0.5890 - val_accuracy: 0.7708
Epoch 12/25
72/72 [==============================] - 158s 2s/step - loss: 0.6192 - accuracy: 0.7502 - val_loss: 0.7274 - val_accuracy: 0.6962
Epoch 13/25
72/72 [==============================] - 160s 2s/step - loss: 0.6422 - accuracy: 0.7502 - val_loss: 0.6135 - val_accuracy: 0.7708
Epoch 14/25
72/72 [==============================] - 162s 2s/step - loss: 0.5998 - accuracy: 0.7650 - val_loss: 0.5490 - val_accuracy: 0.7778
Epoch 15/25
72/72 [==============================] - 160s 2s/step - loss: 0.5518 - accuracy: 0.7868 - val_loss: 0.5800 - val_accuracy: 0.7812
Epoch 16/25
72/72 [==============================] - 162s 2s/step - loss: 0.5303 - accuracy: 0.8003 - val_loss: 0.4866 - val_accuracy: 0.8368
Epoch 17/25
72/72 [==============================] - 159s 2s/step - loss: 0.4918 - accuracy: 0.8182 - val_loss: 0.4529 - val_accuracy: 0.8420
Epoch 18/25
72/72 [==============================] - 159s 2s/step - loss: 0.4700 - accuracy: 0.8405 - val_loss: 0.4392 - val_accuracy: 0.8472
Epoch 19/25
72/72 [==============================] - 163s 2s/step - loss: 0.4455 - accuracy: 0.8344 - val_loss: 0.3768 - val_accuracy: 0.8698
Epoch 20/25
72/72 [==============================] - 161s 2s/step - loss: 0.4612 - accuracy: 0.8230 - val_loss: 0.5770 - val_accuracy: 0.7830
Epoch 21/25
72/72 [==============================] - 160s 2s/step - loss: 0.4384 - accuracy: 0.8448 - val_loss: 0.3567 - val_accuracy: 0.8819
Epoch 22/25
72/72 [==============================] - 162s 2s/step - loss: 0.3900 - accuracy: 0.8601 - val_loss: 0.3392 - val_accuracy: 0.8889
Epoch 23/25
72/72 [==============================] - 161s 2s/step - loss: 0.3643 - accuracy: 0.8749 - val_loss: 0.3123 - val_accuracy: 0.8906
Epoch 24/25
72/72 [==============================] - 165s 2s/step - loss: 0.4055 - accuracy: 0.8570 - val_loss: 0.3613 - val_accuracy: 0.8681
Epoch 25/25
72/72 [==============================] - 162s 2s/step - loss: 0.3539 - accuracy: 0.8684 - val_loss: 0.3548 - val_accuracy: 0.8576
In [ ]:
#display train, val,test loss and train,val, test, accuracy
train_scores3 = model3.evaluate(train_ds)
val_scores3 = model3.evaluate(val_ds)
scores3 = model3.evaluate(test_ds)
scores3
72/72 [==============================] - 41s 570ms/step - loss: 0.3734 - accuracy: 0.8601
18/18 [==============================] - 9s 479ms/step - loss: 0.3548 - accuracy: 0.8576
13/13 [==============================] - 6s 457ms/step - loss: 2.7420 - accuracy: 0.6142
Out[ ]:
[2.7419936656951904, 0.6142131686210632]
In [ ]:
acc = history3.history['accuracy']
val_acc = history3.history['val_accuracy']

loss = history3.history['loss']
val_loss = history3.history['val_loss']
In [ ]:
plot_evaluate(acc, val_acc, loss, val_acc, 25)

Confusion Matrix for Model 3¶

In [ ]:
aclass = []
pclass = []
for images, labels in test_ds:
    for i in range(10):
        #ax = plt.subplot(3,3, i + 1)
        #plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class = predict1(model3, images[i].numpy())
        actual_class = class_names[labels[i]]
        if actual_class == 'glioma_tumor':
            aclass.append(0)
        elif actual_class == 'meningioma_tumor':
            aclass.append(1)
        elif actual_class == 'no_tumor':
            aclass.append(2)
        elif actual_class == 'pituitary_tumor':
            aclass.append(3)
        
        if predicted_class == 'glioma_tumor':
            pclass.append(0)
        elif predicted_class == 'meningioma_tumor':
            pclass.append(1)
        elif predicted_class == 'no_tumor':
            pclass.append(2)
        elif predicted_class == 'pituitary_tumor':
            pclass.append(3)
        
        cm = confusion_matrix(np.array(aclass), np.array(pclass))
1/1 [==============================] - 0s 332ms/step
1/1 [==============================] - 0s 41ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 85ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 131ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 116ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 85ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 85ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 41ms/step
1/1 [==============================] - 0s 24ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 45ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 40ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 23ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 57ms/step
In [ ]:
#plotting a confusion matrix for CNN Model 3
plt.figure(figsize=(10,10))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.xlabel("Predicted labels", fontsize=14, weight='bold')
plt.ylabel("Actual labels", fontsize=14, weight='bold')
tick_marks = np.arange(4)
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
plt.tight_layout()
plt.title("Confusion Matrix for CNN model3")
Out[ ]:
Text(0.5, 1.0, 'Confusion Matrix for CNN model3')
In [ ]:
#Evaluate confidence of model on test set
plt.figure(figsize=(15,15))
for images, labels in test_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3,3, i + 1)
        plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class, confidence = predict(model3, images[i].numpy())
        actual_class = class_names[labels[i]]
        
        plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
        
        plt.axis("off")
1/1 [==============================] - 0s 116ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 38ms/step

Model 4¶

In [ ]:
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)

model4 = models.Sequential([
    #Data augmentation
    resize_rescale,
    data_augment,
    
    #feature learning
    layers.Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape = input_shape),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    layers.Conv2D(64, (3,3), activation = 'relu'),
    layers.MaxPooling2D((2,2)),
    
    
    #flattening tensors
    layers.Flatten(),
    
    #fully connected layers
    #layers.Dense(512, activation='relu', kernel_regularizer = regularizers.l2(0.01)),
    layers.Dense(1028, activation='relu', kernel_regularizer = regularizers.l2(0.01)),
    
    #output layer
    layers.Dense(n_classes, activation='softmax')
])

model4.build(input_shape = input_shape)
In [ ]:
model4.compile(
    optimizer="Adam",
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics = ['accuracy'],
)
In [ ]:
history4 = model4.fit(
    train_ds,
    verbose = 1,
    epochs = 25,
    validation_data = val_ds,
)
Epoch 1/25
72/72 [==============================] - 191s 3s/step - loss: 1.6861 - accuracy: 0.3915 - val_loss: 1.2471 - val_accuracy: 0.5139
Epoch 2/25
72/72 [==============================] - 166s 2s/step - loss: 1.1709 - accuracy: 0.4987 - val_loss: 1.1484 - val_accuracy: 0.5000
Epoch 3/25
72/72 [==============================] - 169s 2s/step - loss: 1.0552 - accuracy: 0.5562 - val_loss: 1.0353 - val_accuracy: 0.5747
Epoch 4/25
72/72 [==============================] - 165s 2s/step - loss: 0.9699 - accuracy: 0.6103 - val_loss: 0.9304 - val_accuracy: 0.6667
Epoch 5/25
72/72 [==============================] - 167s 2s/step - loss: 0.9242 - accuracy: 0.6303 - val_loss: 1.1717 - val_accuracy: 0.5365
Epoch 6/25
72/72 [==============================] - 167s 2s/step - loss: 0.8750 - accuracy: 0.6395 - val_loss: 0.7836 - val_accuracy: 0.7309
Epoch 7/25
72/72 [==============================] - 166s 2s/step - loss: 0.8440 - accuracy: 0.6609 - val_loss: 0.8349 - val_accuracy: 0.6840
Epoch 8/25
72/72 [==============================] - 166s 2s/step - loss: 0.7725 - accuracy: 0.6866 - val_loss: 0.7764 - val_accuracy: 0.7118
Epoch 9/25
72/72 [==============================] - 164s 2s/step - loss: 0.7751 - accuracy: 0.6970 - val_loss: 0.7233 - val_accuracy: 0.7031
Epoch 10/25
72/72 [==============================] - 165s 2s/step - loss: 0.7030 - accuracy: 0.7132 - val_loss: 0.6584 - val_accuracy: 0.7535
Epoch 11/25
72/72 [==============================] - 171s 2s/step - loss: 0.6859 - accuracy: 0.7293 - val_loss: 0.6569 - val_accuracy: 0.7569
Epoch 12/25
72/72 [==============================] - 163s 2s/step - loss: 0.6709 - accuracy: 0.7398 - val_loss: 0.6124 - val_accuracy: 0.7587
Epoch 13/25
72/72 [==============================] - 166s 2s/step - loss: 0.6384 - accuracy: 0.7607 - val_loss: 0.6119 - val_accuracy: 0.7552
Epoch 14/25
72/72 [==============================] - 166s 2s/step - loss: 0.6144 - accuracy: 0.7594 - val_loss: 0.6061 - val_accuracy: 0.7882
Epoch 15/25
72/72 [==============================] - 164s 2s/step - loss: 0.5754 - accuracy: 0.7812 - val_loss: 0.5312 - val_accuracy: 0.8299
Epoch 16/25
72/72 [==============================] - 170s 2s/step - loss: 0.5336 - accuracy: 0.7986 - val_loss: 0.5029 - val_accuracy: 0.8264
Epoch 17/25
72/72 [==============================] - 163s 2s/step - loss: 0.5379 - accuracy: 0.7964 - val_loss: 0.4782 - val_accuracy: 0.8160
Epoch 18/25
72/72 [==============================] - 165s 2s/step - loss: 0.5164 - accuracy: 0.8069 - val_loss: 0.4736 - val_accuracy: 0.8281
Epoch 19/25
72/72 [==============================] - 167s 2s/step - loss: 0.4738 - accuracy: 0.8313 - val_loss: 0.4471 - val_accuracy: 0.8559
Epoch 20/25
72/72 [==============================] - 163s 2s/step - loss: 0.4508 - accuracy: 0.8391 - val_loss: 0.4899 - val_accuracy: 0.8194
Epoch 21/25
72/72 [==============================] - 166s 2s/step - loss: 0.4448 - accuracy: 0.8444 - val_loss: 0.5073 - val_accuracy: 0.8212
Epoch 22/25
72/72 [==============================] - 173s 2s/step - loss: 0.4262 - accuracy: 0.8452 - val_loss: 0.3658 - val_accuracy: 0.8924
Epoch 23/25
72/72 [==============================] - 168s 2s/step - loss: 0.4244 - accuracy: 0.8514 - val_loss: 0.3486 - val_accuracy: 0.8872
Epoch 24/25
72/72 [==============================] - 171s 2s/step - loss: 0.4187 - accuracy: 0.8444 - val_loss: 0.3859 - val_accuracy: 0.8785
Epoch 25/25
72/72 [==============================] - 168s 2s/step - loss: 0.3671 - accuracy: 0.8762 - val_loss: 0.4063 - val_accuracy: 0.8663
In [ ]:
#display train, val,test loss and train,val, test, accuracy
train_scores4 = model4.evaluate(train_ds)
val_scores4 = model4.evaluate(val_ds)
scores4 = model4.evaluate(test_ds)
scores4
72/72 [==============================] - 36s 499ms/step - loss: 0.4383 - accuracy: 0.8466
18/18 [==============================] - 9s 472ms/step - loss: 0.4063 - accuracy: 0.8663
13/13 [==============================] - 14s 1s/step - loss: 2.3910 - accuracy: 0.5457
Out[ ]:
[2.3909764289855957, 0.5456852912902832]
In [ ]:
acc = history4.history['accuracy']
val_acc = history4.history['val_accuracy']

loss = history4.history['loss']
val_loss = history4.history['val_loss']
In [ ]:
plot_evaluate(acc, val_acc, loss, val_acc, 25)

Confusion Matrix for Model 4¶

In [ ]:
aclass = []
pclass = []
for images, labels in test_ds:
    for i in range(10):
        #ax = plt.subplot(3,3, i + 1)
        #plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class = predict1(model4, images[i].numpy())
        actual_class = class_names[labels[i]]
        if actual_class == 'glioma_tumor':
            aclass.append(0)
        elif actual_class == 'meningioma_tumor':
            aclass.append(1)
        elif actual_class == 'no_tumor':
            aclass.append(2)
        elif actual_class == 'pituitary_tumor':
            aclass.append(3)
        
        if predicted_class == 'glioma_tumor':
            pclass.append(0)
        elif predicted_class == 'meningioma_tumor':
            pclass.append(1)
        elif predicted_class == 'no_tumor':
            pclass.append(2)
        elif predicted_class == 'pituitary_tumor':
            pclass.append(3)
        
        cm = confusion_matrix(np.array(aclass), np.array(pclass))
1/1 [==============================] - 2s 2s/step
1/1 [==============================] - 1s 517ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 231ms/step
1/1 [==============================] - 0s 202ms/step
1/1 [==============================] - 0s 116ms/step
1/1 [==============================] - 0s 165ms/step
1/1 [==============================] - 0s 149ms/step
1/1 [==============================] - 0s 141ms/step
1/1 [==============================] - 0s 239ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 71ms/step
1/1 [==============================] - 0s 182ms/step
1/1 [==============================] - 0s 100ms/step
1/1 [==============================] - 0s 83ms/step
1/1 [==============================] - 0s 165ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 82ms/step
1/1 [==============================] - 0s 250ms/step
1/1 [==============================] - 0s 65ms/step
1/1 [==============================] - 0s 164ms/step
1/1 [==============================] - 0s 44ms/step
1/1 [==============================] - 0s 100ms/step
1/1 [==============================] - 0s 84ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 118ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 100ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 84ms/step
1/1 [==============================] - 0s 83ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 150ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 397ms/step
1/1 [==============================] - 0s 66ms/step
1/1 [==============================] - 0s 56ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 65ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 150ms/step
1/1 [==============================] - 0s 99ms/step
1/1 [==============================] - 0s 73ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 151ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 85ms/step
1/1 [==============================] - 0s 40ms/step
1/1 [==============================] - 0s 66ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 42ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 43ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 103ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 100ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 41ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 83ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 82ms/step
1/1 [==============================] - 0s 63ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 61ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 92ms/step
1/1 [==============================] - 0s 84ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 170ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 94ms/step
1/1 [==============================] - 0s 58ms/step
1/1 [==============================] - 0s 84ms/step
1/1 [==============================] - 0s 117ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 54ms/step
1/1 [==============================] - 0s 64ms/step
1/1 [==============================] - 0s 39ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 61ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 40ms/step
1/1 [==============================] - 0s 62ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 62ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 60ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 117ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 46ms/step
1/1 [==============================] - 0s 33ms/step
In [ ]:
plt.figure(figsize=(15,15))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.xlabel("Predicted labels")
plt.ylabel("Actual labels")
plt.title("Confusion Matrix for CNN model1")
Out[ ]:
Text(0.5, 1.0, 'Confusion Matrix for CNN model1')
In [ ]:
#Evaluate confidence of model on test set
plt.figure(figsize=(15,15))
for images, labels in test_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3,3, i + 1)
        plt.imshow(images[i].numpy(). astype("uint8"))
        
        predicted_class, confidence = predict(model4, images[i].numpy())
        actual_class = class_names[labels[i]]
        
        plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
        
        plt.axis("off")
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 47ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 69ms/step
1/1 [==============================] - 0s 125ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 100ms/step

Conclusion¶

In [ ]:
#Summary of Evaluation results
dic = [
    {"Type": "CNN 1", "Train_Accuracy": train_scores[1],"Validation_Accuracy": val_scores[1],"Test_Accuracy": scores[1], "Train_loss": train_scores[0], "Validation_loss": val_scores[0], "Test_loss": scores[0]},
    {"Type": "CNN 2", "Train_Accuracy": train_scores2[1],"Validation_Accuracy": val_scores2[1],"Test_Accuracy": scores2[1], "Train_loss": train_scores2[0], "Validation_loss": val_scores2[0], "Test_loss": scores2[0]},
    {"Type": "CNN 3", "Train_Accuracy": train_scores3[1],"Validation_Accuracy": val_scores3[1],"Test_Accuracy": scores3[1], "Train_loss": train_scores3[0], "Validation_loss": val_scores3[0], "Test_loss": scores3[0]},
    {"Type": "CNN 4", "Train_Accuracy": train_scores4[1],"Validation_Accuracy": val_scores4[1],"Test_Accuracy": scores4[1], "Train_loss": train_scores4[0], "Validation_loss": val_scores4[0], "Test_loss": scores[0]},  
]

table_summary = pd.DataFrame(dic)
table_summary
Out[ ]:
Type Train_Accuracy Validation_Accuracy Test_Accuracy Train_loss Validation_loss Test_loss
0 CNN 1 0.866609 0.868056 0.532995 0.340729 0.330044 2.661870
1 CNN 2 0.868352 0.890625 0.591371 0.362849 0.330834 1.929281
2 CNN 3 0.860070 0.857639 0.614213 0.373434 0.354774 2.741994
3 CNN 4 0.846556 0.866319 0.545685 0.438328 0.406294 2.661870
In [ ]:
#plt.figure()
#table_summary.plot(kind='bar', labels=['CNN1','CNN2','CNN3','CNN4'])
#plt.subplot(1,2,1)
fig = px.bar(table_summary, x='Type',
             y=['Train_loss','Validation_loss','Test_loss'],
             barmode='group',
             text_auto=True,
             labels={"value": "Loss"},
             title="Loss values for each model"
             )

#plt.subplot(1,2,2)
fig1 = px.bar(table_summary, x='Type',
              y=['Train_Accuracy','Validation_Accuracy','Test_Accuracy'], 
              barmode='group', 
              text_auto=True,
              labels={"value": "Accuracy"},
              title="Accuracy values for each model"
              )
#plt.legend()
fig.show()
fig1.show()
In [ ]: